昨天介紹了eventrouter收集k8s叢集中的event,今天則是要將eventrouter所收集到的event送上elasticsearch中,那elasticsearch的部署我就不多講了,但是今天則會用到logstash來整理資料,所以需要部署logstash。
apiVersion: v1
kind: ServiceAccount
metadata:
name: eventrouter
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: eventrouter
rules:
- apiGroups: [""]
resources: ["events"]
verbs: ["get", "watch", "list"]
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: eventrouter
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: eventrouter
subjects:
- kind: ServiceAccount
name: eventrouter
namespace: kube-system
---
apiVersion: v1
kind: ConfigMap
metadata:
name: eventrouter-cm
namespace: kube-system
data:
config.json: |-
{
"sink": "glog"
}
---
apiVersion: v1
kind: ConfigMap
metadata:
name: filebeat-config
namespace: kube-system
data:
filebeat.yml: |-
filebeat.inputs:
- type: log
paths:
- "/data/log/eventrouter/*"
# output.elasticsearch:
# hosts: ["elasticsearch-logging:9200"]
output.logstash:
hosts: ["logstash:5044"]
enabled: true
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: eventrouter
namespace: kube-system
labels:
app: eventrouter
spec:
replicas: 1
selector:
matchLabels:
app: eventrouter
template:
metadata:
labels:
app: eventrouter
tier: control-plane-addons
spec:
containers:
- name: kube-eventrouter
image: baiyongjie/eventrouter:v0.2
command:
- "/bin/sh"
args:
- "-c"
- "/eventrouter -v 3 -log_dir /data/log/eventrouter"
volumeMounts:
- name: eventrouter-cm
mountPath: /etc/eventrouter
- name: log-path
mountPath: /data/log/eventrouter
- name: filebeat
image: docker.io/kubeimages/filebeat:7.9.3
args: [
"-c", "/etc/filebeat.yml",
"-e","-httpprof","0.0.0.0:6060"
]
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: ELASTICSEARCH_HOST
value: elasticsearch-logging
- name: ELASTICSEARCH_PORT
value: "9200"
securityContext:
runAsUser: 0
resources:
limits:
memory: 1000Mi
cpu: 1000m
requests:
memory: 100Mi
cpu: 100m
volumeMounts:
- name: filebeat-config
mountPath: /etc/filebeat.yml
readOnly: true
subPath: filebeat.yml
- name: log-path
mountPath: /data/log/eventrouter
serviceAccount: eventrouter
volumes:
- name: eventrouter-cm
configMap:
name: eventrouter-cm
- name: filebeat-config
configMap:
name: filebeat-config
- name: log-path
emptyDir: {}
---
apiVersion: v1
kind: Service
metadata:
name: logstash
namespace: kube-system
spec:
ports:
- port: 5044
targetPort: beats
selector:
type: logstash
clusterIP: None
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: logstash
namespace: kube-system
spec:
selector:
matchLabels:
type: logstash
template:
metadata:
labels:
type: logstash
srv: srv-logstash
spec:
containers:
- image: docker.io/kubeimages/logstash:7.9.3
name: logstash
ports:
- containerPort: 5044
name: beats
command:
- logstash
- '-f'
- '/etc/logstash_c/logstash.conf'
env:
- name: "XPACK_MONITORING_ELASTICSEARCH_HOSTS"
value: "http://elasticsearch-logging:9200"
volumeMounts:
- name: config-volume
mountPath: /etc/logstash_c/
- name: config-yml-volume
mountPath: /usr/share/logstash/config/
- name: timezone
mountPath: /etc/localtime
resources:
limits:
cpu: 1000m
memory: 2048Mi
requests:
cpu: 512m
memory: 512Mi
volumes:
- name: config-volume
configMap:
name: logstash-conf
items:
- key: logstash.conf
path: logstash.conf
- name: timezone
hostPath:
path: /etc/localtime
- name: config-yml-volume
configMap:
name: logstash-yml
items:
- key: logstash.yml
path: logstash.yml
---
apiVersion: v1
kind: ConfigMap
metadata:
name: logstash-conf
namespace: kube-system
labels:
type: logstash
data:
logstash.conf: |-
input {
beats {
port => 5044
}
}
filter {
grok {
match => {
"message" => '%{DATA:uselessdata}\] %{GREEDYDATA:data}'
}
}
json {
source => "data"
}
}
output{
elasticsearch {
hosts => ["http://elasticsearch-logging:9200"]
codec => json
index => "logstash-%{+YYYY.MM.dd}"
}
}
---
apiVersion: v1
kind: ConfigMap
metadata:
name: logstash-yml
namespace: kube-system
labels:
type: logstash
data:
logstash.yml: |-
http.host: "0.0.0.0"
xpack.monitoring.elasticsearch.hosts: http://elasticsearch-logging:9200
透過修改eventrouter使得event會輸出到一個file中
FROM openshift/origin-release:golang-1.14 AS build
COPY . /go/src/github.com/openshift/eventrouter
RUN cd /go/src/github.com/openshift/eventrouter && go build .
FROM centos:7
COPY --from=build /go/src/github.com/openshift/eventrouter/eventrouter /bin/eventrouter
CMD ["/bin/eventrouter", "-v", "3", "-logtostderr"]
LABEL version=v0.3
FROM openshift/origin-release:golang-1.14 AS build
COPY . /go/src/github.com/openshift/eventrouter
RUN cd /go/src/github.com/openshift/eventrouter && go build .
FROM centos:7
RUN mkdir -p /data/log/eventrouter
COPY --from=build /go/src/github.com/openshift/eventrouter/eventrouter /bin/eventrouter
CMD ["/bin/eventrouter", "-v", "3", "-log_dir", "/data/log/eventrouter"]
# 將部署eventrouter的yaml其中的command移除
那關於監控叢集的資源使用率、log、events的部分就告一段落,有任何意見或是疑問都可以提出,由於elk部份的使用上我並不是那麼熟悉,有任何錯誤也都歡迎指正 謝謝各位:)